bitkeeper revision 1.1159.59.1 (412f52e5pzGsSRKxWkXlLmoWzjYc7g)
authorkaf24@freefall.cl.cam.ac.uk <kaf24@freefall.cl.cam.ac.uk>
Fri, 27 Aug 2004 15:27:33 +0000 (15:27 +0000)
committerkaf24@freefall.cl.cam.ac.uk <kaf24@freefall.cl.cam.ac.uk>
Fri, 27 Aug 2004 15:27:33 +0000 (15:27 +0000)
Add debugging for locked critical regions. Allows us to assert that
certain things don't happen while in a c.r.: currently these include
taking page faults and GPFs, and also we disallow use of the
user-space access macros (uaccess.h).

xen/arch/x86/nmi.c
xen/arch/x86/traps.c
xen/common/kernel.c
xen/include/asm-x86/spinlock.h
xen/include/asm-x86/x86_32/uaccess.h
xen/include/xen/spinlock.h

index ec12834ee73d915eac7e2dc057bfd1d1b4c23a46..d2554aeffcc51fba00274fd32e94b17c02c753d5 100644 (file)
@@ -288,6 +288,7 @@ void nmi_watchdog_tick (struct pt_regs * regs)
         if ( alert_counter[cpu] == 5*nmi_hz )
         {
             console_force_unlock();
+            disable_criticalregion_checking();
             die("NMI Watchdog detected LOCKUP on CPU", regs, cpu);
         }
     } 
index 46d557f59da1870d987f450aa17e061b3d1519a7..24f1d401d245addfe3145af3eb249873d89ab870 100644 (file)
@@ -315,6 +315,8 @@ asmlinkage void do_page_fault(struct pt_regs *regs, long error_code)
 
     perfc_incrc(page_faults);
 
+    ASSERT_no_criticalregion();
+
     if ( unlikely(addr >= LDT_VIRT_START) && 
          (addr < (LDT_VIRT_START + (d->mm.ldt_ents*LDT_ENTRY_SIZE))) )
     {
@@ -413,6 +415,8 @@ asmlinkage void do_general_protection(struct pt_regs *regs, long error_code)
     trap_info_t *ti;
     unsigned long fixup;
 
+    ASSERT_no_criticalregion();
+
     /* Badness if error in ring 0, or result of an interrupt. */
     if ( !(regs->xcs & 3) || (error_code & 1) )
         goto gp_in_kernel;
@@ -493,6 +497,7 @@ asmlinkage void do_general_protection(struct pt_regs *regs, long error_code)
 asmlinkage void mem_parity_error(struct pt_regs *regs)
 {
     console_force_unlock();
+    disable_criticalregion_checking();
 
     printk("\n\n");
 
@@ -513,6 +518,7 @@ asmlinkage void mem_parity_error(struct pt_regs *regs)
 asmlinkage void io_check_error(struct pt_regs *regs)
 {
     console_force_unlock();
+    disable_criticalregion_checking();
 
     printk("\n\n");
 
index 3e37bded7dc82e363577fec8d93b305006780afc..6d70547321df9783de9a3bbf98e2e98a9d43938c 100644 (file)
@@ -389,3 +389,44 @@ long do_ni_hypercall(void)
     /* No-op hypercall. */
     return -ENOSYS;
 }
+
+/*
+ * Lock debugging
+ */
+
+#ifndef NDEBUG
+
+static int crit_count[NR_CPUS];
+static int crit_checking = 1;
+
+void disable_criticalregion_checking(void)
+{
+    crit_checking = 0;
+}
+
+void criticalregion_enter(void)
+{
+    int cpu = smp_processor_id();
+    ASSERT(crit_count[cpu] >= 0);
+    crit_count[cpu]++;
+}
+
+void criticalregion_exit(void)
+{
+    int cpu = smp_processor_id();
+    crit_count[cpu]--;
+    ASSERT(crit_count[cpu] >= 0);
+}
+
+void ASSERT_no_criticalregion(void)
+{
+    int cpu = smp_processor_id();
+    if ( (crit_count[cpu] == 0) || !crit_checking )
+        return;
+    disable_criticalregion_checking();
+    ASSERT(crit_count[cpu] >= 0); /* -ve count is a special kind of bogus! */
+    ASSERT(crit_count[cpu] == 0); /* we should definitely take this path   */
+    ASSERT(1); /* NEVER GET HERE! */
+}
+
+#endif /* !NDEBUG */
index dd9869d331264a29f9c7628c85cb2e3b87699d85..1d864188dac1060e0448a2049c935d8d58874aa1 100644 (file)
@@ -17,7 +17,7 @@ typedef struct {
 #define spin_lock_init(x)      do { *(x) = SPIN_LOCK_UNLOCKED; } while(0)
 #define spin_is_locked(x)      (*(volatile char *)(&(x)->lock) <= 0)
 
-static inline void spin_lock(spinlock_t *lock)
+static inline void _raw_spin_lock(spinlock_t *lock)
 {
     __asm__ __volatile__ (
         "1:  lock; decb %0         \n"
@@ -31,7 +31,7 @@ static inline void spin_lock(spinlock_t *lock)
         : "=m" (lock->lock) : : "memory" );
 }
 
-static inline void spin_unlock(spinlock_t *lock)
+static inline void _raw_spin_unlock(spinlock_t *lock)
 {
 #if !defined(CONFIG_X86_OOSTORE)
     ASSERT(spin_is_locked(lock));
@@ -47,7 +47,7 @@ static inline void spin_unlock(spinlock_t *lock)
 #endif
 }
 
-static inline int spin_trylock(spinlock_t *lock)
+static inline int _raw_spin_trylock(spinlock_t *lock)
 {
     char oldval;
     __asm__ __volatile__(
@@ -64,7 +64,7 @@ static inline int spin_trylock(spinlock_t *lock)
  * are any critical regions that cannot form part of such a set, they can use
  * standard spin_[un]lock().
  */
-#define spin_lock_recursive(_lock)                 \
+#define _raw_spin_lock_recursive(_lock)            \
     do {                                           \
         int cpu = smp_processor_id();              \
         if ( likely((_lock)->recurse_cpu != cpu) ) \
@@ -75,7 +75,7 @@ static inline int spin_trylock(spinlock_t *lock)
         (_lock)->recurse_cnt++;                    \
     } while ( 0 )
 
-#define spin_unlock_recursive(_lock)               \
+#define _raw_spin_unlock_recursive(_lock)          \
     do {                                           \
         if ( likely(--(_lock)->recurse_cnt == 0) ) \
         {                                          \
@@ -97,32 +97,23 @@ typedef struct {
  * On x86, we implement read-write locks as a 32-bit counter
  * with the high bit (sign) being the "contended" bit.
  */
-static inline void read_lock(rwlock_t *rw)
+static inline void _raw_read_lock(rwlock_t *rw)
 {
     __build_read_lock(rw, "__read_lock_failed");
 }
 
-static inline void write_lock(rwlock_t *rw)
+static inline void _raw_write_lock(rwlock_t *rw)
 {
     __build_write_lock(rw, "__write_lock_failed");
 }
 
-#define read_unlock(rw)                            \
+#define _raw_read_unlock(rw)                       \
     __asm__ __volatile__ (                         \
         "lock ; incl %0" :                         \
         "=m" ((rw)->lock) : : "memory" )
-#define write_unlock(rw)                           \
+#define _raw_write_unlock(rw)                      \
     __asm__ __volatile__ (                         \
         "lock ; addl $" RW_LOCK_BIAS_STR ",%0" :   \
         "=m" ((rw)->lock) : : "memory" )
 
-static inline int write_trylock(rwlock_t *lock)
-{
-    atomic_t *count = (atomic_t *)lock;
-    if ( atomic_sub_and_test(RW_LOCK_BIAS, count) )
-        return 1;
-    atomic_add(RW_LOCK_BIAS, count);
-    return 0;
-}
-
 #endif /* __ASM_SPINLOCK_H */
index 776639782c50e1b2c01ccc7dc0905dbd85a377f4..58829fa204a169ef73039a610cdb4d4823b682f9 100644 (file)
@@ -243,6 +243,7 @@ struct __large_struct { unsigned long buf[100]; };
  * aliasing issues.
  */
 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret)      \
+       ASSERT_no_criticalregion();                                     \
        __asm__ __volatile__(                                           \
                "1:     mov"itype" %"rtype"1,%2\n"                      \
                "2:\n"                                                  \
@@ -291,6 +292,7 @@ do {                                                                        \
 } while (0)
 
 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret)      \
+       ASSERT_no_criticalregion();                                     \
        __asm__ __volatile__(                                           \
                "1:     mov"itype" %2,%"rtype"1\n"                      \
                "2:\n"                                                  \
@@ -334,6 +336,7 @@ unsigned long __copy_from_user_ll(void *to, const void __user *from, unsigned lo
 static always_inline unsigned long
 __copy_to_user(void __user *to, const void *from, unsigned long n)
 {
+       ASSERT_no_criticalregion();
        if (__builtin_constant_p(n)) {
                unsigned long ret;
 
@@ -372,6 +375,7 @@ __copy_to_user(void __user *to, const void *from, unsigned long n)
 static always_inline unsigned long
 __copy_from_user(void *to, const void __user *from, unsigned long n)
 {
+       ASSERT_no_criticalregion();
        if (__builtin_constant_p(n)) {
                unsigned long ret;
 
index b74b5f802fb243849b81c2737514b2acc04c2fba..0b4503d999d6dffd3cf4c21392592283d5c8f904 100644 (file)
@@ -48,14 +48,13 @@ typedef struct { int gcc_is_buggy; } spinlock_t;
 #define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 }
 #endif
 
-#define spin_lock_init(lock)    do { } while(0)
-#define spin_lock(lock)         (void)(lock) /* Not "unused variable". */
-#define spin_is_locked(lock)    (0)
-#define spin_trylock(lock)      ({1; })
-#define spin_unlock_wait(lock)  do { } while(0)
-#define spin_unlock(lock)       do { } while(0)
-#define spin_lock_recursive(lock)   do { } while(0)
-#define spin_unlock_recursive(lock) do { } while(0)
+#define spin_lock_init(lock)             do { } while(0)
+#define spin_is_locked(lock)             (0)
+#define _raw_spin_lock(lock)             (void)(lock)
+#define _raw_spin_trylock(lock)          ({1; })
+#define _raw_spin_unlock(lock)           do { } while(0)
+#define _raw_spin_lock_recursive(lock)   do { } while(0)
+#define _raw_spin_unlock_recursive(lock) do { } while(0)
 
 #if (__GNUC__ > 2)
 typedef struct { } rwlock_t;
@@ -65,11 +64,63 @@ typedef struct { int gcc_is_buggy; } rwlock_t;
 #define RW_LOCK_UNLOCKED (rwlock_t) { 0 }
 #endif
 
-#define rwlock_init(lock)       do { } while(0)
-#define read_lock(lock)         (void)(lock) /* Not "unused variable". */
-#define read_unlock(lock)       do { } while(0)
-#define write_lock(lock)        (void)(lock) /* Not "unused variable". */
-#define write_unlock(lock)      do { } while(0)
+#define rwlock_init(lock)            do { } while(0)
+#define _raw_read_lock(lock)         (void)(lock) /* Not "unused variable". */
+#define _raw_read_unlock(lock)       do { } while(0)
+#define _raw_write_lock(lock)        (void)(lock) /* Not "unused variable". */
+#define _raw_write_unlock(lock)      do { } while(0)
+
+#endif
+
+#ifndef NDEBUG
+
+extern void criticalregion_enter(void);
+extern void criticalregion_exit(void);
+extern void ASSERT_no_criticalregion(void);
+extern void disable_criticalregion_checking(void);
+
+#define spin_lock(_lock) \
+    do { criticalregion_enter(); _raw_spin_lock(_lock); } while (0)
+#define spin_unlock(_lock) \
+    do { _raw_spin_unlock(_lock); criticalregion_exit(); } while (0)
+#define spin_lock_recursive(_lock) \
+    do { criticalregion_enter(); _raw_spin_lock_recursive(_lock); } while (0)
+#define spin_unlock_recursive(_lock) \
+    do { _raw_spin_unlock_recursive(_lock); criticalregion_exit(); } while (0)
+#define read_lock(_lock) \
+    do { criticalregion_enter(); _raw_read_lock(_lock); } while (0)
+#define read_unlock(_lock) \
+    do { _raw_read_unlock(_lock); criticalregion_exit(); } while (0)
+#define write_lock(_lock) \
+    do { criticalregion_enter(); _raw_write_lock(_lock); } while (0)
+#define write_unlock(_lock) \
+    do { _raw_write_unlock(_lock); criticalregion_exit(); } while (0)
+
+static inline int spin_trylock(spinlock_t *lock)
+{
+    criticalregion_enter();
+    if ( !_raw_spin_trylock(lock) )
+    {
+        criticalregion_exit();
+        return 0;
+    }
+    return 1;
+}
+
+#else
+
+#define ASSERT_no_criticalregion()        ((void)0)
+#define disable_criticalregion_checking() ((void)0)
+
+#define spin_lock(_lock)             _raw_spin_lock(_lock)
+#define spin_trylock(_lock)          _raw_spin_trylock(_lock)
+#define spin_unlock(_lock)           _raw_spin_unlock(_lock)
+#define spin_lock_recursive(_lock)   _raw_spin_lock_recursive(_lock)
+#define spin_unlock_recursive(_lock) _raw_spin_unlock_recursive(_lock)
+#define read_lock(_lock)             _raw_read_lock(_lock)
+#define read_unlock(_lock)           _raw_read_unlock(_lock)
+#define write_lock(_lock)            _raw_write_lock(_lock)
+#define write_unlock(_lock)          _raw_write_unlock(_lock)
 
 #endif